1 Problem A - Euler-Maruyama Method

First we implement a discretization for the Black-Scholes-Merton price dynamics with the given parameter set, with \(n = 250\) steps. We generate \(M_1 = 10\), \(M_2 = 100\), \(M_3 = 1000\), \(M_4 = 10000\) and \(M_5 = 100000\) paths and plot the paths in separate figures.

set.seed(061999)
s0 <- 12
T1 <- 1
mu <- 0.03
sigma <- 0.17
n <-  250
t <- seq(0, T1, length.out = n+1) # n steps means n+1 points in this vector. 
h <- t[2] - t[1] # h = T1/n gives the same result. 
M1 <- 10
M2 <- 100
M3 <- 1000
M4 <- 10000
M5 <- 100000
# Price path stochastic process. 
price.path <- function(s0, t, mu, sigma){
  W <- rnorm(n = length(t), mean = 0, sd = t)
  s0*exp(mu*t)*exp(sigma*W-sigma^2/2*t)
}
# Generate the price paths for all the given number of paths. 
M1.paths <- matrix(rep(NA, length.out = M1*(n+1)), nrow = M1)
for (i in 1:M1){
  M1.paths[i,] <- price.path(s0 = s0, t = t, mu = mu, sigma = sigma)
}

M2.paths <- matrix(rep(NA, length.out = M2*(n+1)), nrow = M2)
for (i in 1:M2){
  M2.paths[i,] <- price.path(s0, t, mu, sigma)
}

M3.paths <- matrix(rep(NA, length.out = M3*(n+1)), nrow = M3)
for (i in 1:M3){
  M3.paths[i,] <- price.path(s0, t, mu, sigma)
}

M4.paths <- matrix(rep(NA, length.out = M4*(n+1)), nrow = M4)
for (i in 1:M4){
  M4.paths[i,] <- price.path(s0, t, mu, sigma)
}

M5.paths <- matrix(rep(NA, length.out = M5*(n+1)), nrow = M5)
for (i in 1:M5){
  M5.paths[i,] <- price.path(s0, t, mu, sigma)
}
#plot(NULL, NULL, main = paste0(M1," paths"), xlim = c(0,1), ylim = c(0, 50), ylab = "S", xlab = "t")

#lines(M1.paths, col = 1:M1)
#df1 <- tibble(M1.paths)
#df1 %>% ggplot()

# Burde lage denne plotting bedre med ggplot senere kanskje!
# Endre til tid på x-aksen også kanskje!
matplot(t(M1.paths), type = "l", main = paste0(M1," paths"), xlab = "Discretization Points", ylab = "S")

# Burde lage denne plotting bedre med ggplot senere kanskje!
# Endre til tid på x-aksen også kanskje!
#M2.p <- tibble(M2.paths)
matplot(t(M2.paths), type = "l", main = paste0(M2," paths"), xlab = "Discretization Points", ylab = "S")
lines(apply(M2.paths, 2, mean), cex = 2)
lines(s0*exp(mu*t))

matplot(t(M3.paths), type = "l", main = paste0(M3," paths"), xlab = "Discretization Points", ylab = "S")

TESTING CODE BELOW COMPARING TO SLIDE 33. Tror noe er galt!

n2 <- 100
Mtest.paths <- matrix(rep(NA, length.out = M2*(n2+1)), nrow = M2)
t <- seq(0, 5, length.out = n2+1)
s0 <- 10
for (i in 1:M2){
  Mtest.paths[i,] <- price.path(s0 = s0, t = t, 
                                mu = 0.05, sigma = 0.05) 
}
#lines(M1.paths, col = 1:M1) 
#df2 <- tibble(M2.paths)
#df1 %>% ggplot()

m <- apply(Mtest.paths, 2, mean)

# Burde lage denne plotting bedre med ggplot senere kanskje!
# Endre til tid på x-aksen også kanskje!
matplot(t(Mtest.paths), type = "l", main = paste0(M2," paths"), xlab = "Discretization Points", ylab = "S", ylim = c(0,20))
lines(m)
lines(s0*exp(0.05*t))

ÅPENBART AT NOE ER GALT HER! TROR DEN VARIERER ALT FOR MYE!

TESTING: Prøvde å få denne til å ligne på plot på slide 33 i Section 2.2, men mine paths varierer veldig mye! Tror det er noe galt i koden for pathsene muligens!!

The Monte Carlo estimator for \(\hat{S}_T\) is calculated separately for each of the values of \(M_i\). The 5% confidence interval is provided for each estimator. A comparison between these estimators and the analytical solution of \(\mathbb{E}(S_T)\) is done and differences are explained.

# Calculate the Monte-Carlo estimator of $\hat{S}_T$ for each of the values of $M_i$.
hat_ST <- function(M, n){
  mean(M[,n+1]) # Calculate mean over last value of all M_i paths. 
}
# This simply calculates the average over the last value of all the different paths. 

CI_ST <- function(M, n){
  s <- sd(M[,n+1]) # Calculate sample standard deviation over last value of all M_i paths. 
  m <- hat_ST(M, n) # Calculate mean.
  ste <- qnorm(0.975)*s/sqrt(dim(M)[[1]])
  return(list(l = m - ste, u = m + ste))
}
M1.hat <- hat_ST(M1.paths, n) 
CI1 <- CI_ST(M1.paths, n)
M2.hat <- hat_ST(M2.paths, n) 
CI2 <- CI_ST(M2.paths, n)
M3.hat <- hat_ST(M3.paths, n) 
CI3 <- CI_ST(M3.paths, n)
M4.hat <- hat_ST(M4.paths, n) 
CI4 <- CI_ST(M4.paths, n)
M5.hat <- hat_ST(M5.paths, n) 
CI5 <- CI_ST(M5.paths, n)

col1 <- rbind(M1.hat, CI1$l, CI1$u)
col2 <- rbind(M2.hat, CI2$l, CI2$u)
col3 <- rbind(M3.hat, CI3$l, CI3$u)
col4 <- rbind(M4.hat, CI4$l, CI4$u)
col5 <- rbind(M5.hat, CI5$l, CI5$u)

hats <- cbind(col1, col2, col3, col4, col5)
colnames(hats) <- c("M1 = 10", "M2 = 100", "M3 = 1000", "M4 = 10000", "M5 = 100000") 
rownames(hats) <- c("Est.", "Lower CI", "Upper CI")
knitr::kable(hats, caption = "Monte Carlo Estimation for S_T, varying M")
Monte Carlo Estimation for S_T, varying M
M1 = 10 M2 = 100 M3 = 1000 M4 = 10000 M5 = 100000
Est. 12.84100 12.51920 12.31204 12.35939 12.37081
Lower CI 11.45525 12.11983 12.18132 12.31775 12.35765
Upper CI 14.22674 12.91857 12.44276 12.40103 12.38396

Now, what is the analytical solution for \(\mathbb{E}(S_T)\)? The analytical solution is

# I think this is the analytical solution, but I am not certain!?
s0*exp(mu+sigma^2/2)
#> [1] 10.45453

Explain the differences and now and why the confidence intervals differ.

Now we fix the number of paths \(M^* = 1000\) and vary the values of n, i.e. the number of steps, while repeating the discussion done above.

M.star <- 1000
n1 <- 12
n2 <- 24
n3 <- 250
n4 <- 1000
# Find new paths. 
n1.paths <- matrix(rep(NA, length.out = M.star*(n1+1)), nrow = M.star)
for (i in 1:M.star){
  t1 <- seq(0, T1, length.out = n1+1) # n steps means n+1 points in this vector. 
  n1.paths[i,] <- price.path(s0, t1, mu, sigma)
}

n2.paths <- matrix(rep(NA, length.out = M.star*(n2+1)), nrow = M.star)
for (i in 1:M.star){
  t2 <- seq(0, T1, length.out = n2+1) # n steps means n+1 points in this vector. 
  n2.paths[i,] <- price.path(s0, t2, mu, sigma)
}

n3.paths <- matrix(rep(NA, length.out = M.star*(n3+1)), nrow = M.star)
for (i in 1:M.star){
  t3 <- seq(0, T1, length.out = n3+1) # n steps means n+1 points in this vector. 
  n3.paths[i,] <- price.path(s0, t3, mu, sigma)
}

n4.paths <- matrix(rep(NA, length.out = M.star*(n4+1)), nrow = M.star)
for (i in 1:M.star){
  t4 <- seq(0, T1, length.out = n4+1) # n steps means n+1 points in this vector. 
  n4.paths[i,] <- price.path(s0, t4, mu, sigma)
}
# Burde nok helst unngå den transponerte, heller definere matrisen omvendt.
matplot(t(n1.paths), type = "l")
matplot(t(n2.paths), type = "l")
matplot(t(n3.paths), type = "l")
matplot(t(n4.paths), type = "l")
n1.hat <- hat_ST(n1.paths, n1) 
CI1 <- CI_ST(n1.paths, n1)
n2.hat <- hat_ST(n2.paths, n2) 
CI2 <- CI_ST(n2.paths, n2)
n3.hat <- hat_ST(n3.paths, n3) 
CI3 <- CI_ST(n3.paths, n3)
n4.hat <- hat_ST(n4.paths, n4) 
CI4 <- CI_ST(n4.paths, n4)

col1 <- rbind(n1.hat, CI1$l, CI1$u)
col2 <- rbind(n2.hat, CI2$l, CI2$u)
col3 <- rbind(n3.hat, CI3$l, CI3$u)
col4 <- rbind(n4.hat, CI4$l, CI4$u)

hats <- cbind(col1, col2, col3, col4)
colnames(hats) <- c("n1 = 12", "n2 = 24", "n3 = 250", "n4 = 1000")
rownames(hats) <- c("Est.", "Lower CI", "Upper CI")
knitr::kable(hats, caption = "Monte Carlo Estimation for S_T, varying n")
Monte Carlo Estimation for S_T, varying n
n1 = 12 n2 = 24 n3 = 250 n4 = 1000
Est. 10.30598 10.24194 10.44583 10.33804
Lower CI 10.20062 10.13197 10.33664 10.22854
Upper CI 10.41135 10.35191 10.55502 10.44755

Yes, the results differ (again). Discuss why!!

2 Problem B - Option Pricing I

set.seed(061999)
s0 <- 24
T1 <- 1.5
r <- 0.02
sigma <- 0.22
K <- 23.5
# BSM pricing formula for European call option, 
# to calculate the fair price at t = 0 in closed form.
BSM <- function(s0, T1, r, sigma, K){
  d1 <- (log(s0/K)+(r+sigma^2/2)*T1)/(sigma*sqrt(T1))
  d2 <- d1 - sigma*sqrt(T1)
  s0*pnorm(d1) - exp(-r*T1)*K*pnorm(d2)  
}

We calculate the price of the given Call option (parameters are given in code block above) with the Black-Scholes-Merton formula.

(BSM.price <- BSM(s0, T1, r, sigma, K))
#> [1] 3.149899

As we can see, the price is approximately equal to 3.15, when rounded to 2 decimals.

We implement a Monte Carlo estimator for this option price by simulating paths with the Euler-Maruyama method for given steps and paths.

Notice that for the path-independent options, like standard European call and put options, it is not needed to save the price path as is done below. This is done to keep the function as general as possible, in order to re-use it for the rest of the assignment. THIS COULD EASILY BE CHANGED IN THIS CASE IF TOO SLOW! (simply overwriting the previous value in the loop in every iteration).

# Monte Carlo estimator for Euler-Maruyama. 
# This is for a Call option still (almost general) (slide 28)
MC <- function(M, n, r, s0, mu, h, sigma, T1, payoff.profile, set.S, ...){
  C.values <- rep(NA, length.out = M) # Initiate vector for saving payoffs per path. 
  for (i in 1:M){ # Initiate loop for M path.
    Z <- rnorm(n+1) # Generate n random variables standard normally. 
    S.values <- rep(NA, length.out = n+1) # Initiate vector for saving price path. 
    S.values[1] <- s0 # Initiate starting price at time t = 0. 
    for (j in 2:(n+1)){ # Loop for Euler-Maruyama - recursively estimate price path. 
      S.values[j] <- S.values[j-1] + mu*S.values[j-1]*h + sigma*S.values[j-1]*sqrt(h)*Z[j-1]
    }
    S <- set.S(S.values) # Set expected price used in payoff-profile. 
    # Generality is kept using a helper function set.S.
    C.values[i] <- payoff.profile(S, K) # Calculate payoff for the path and save in C.values.
  }
  exp(-r*T1)*mean(C.values) # Return discounted average payoff. This is the MC estimator. 
}
# Definition of helper functions to feed into MC estimator code above. 
Call.profile <- function(S, K){
  max(0, S-K)
}

# Feed the function into the MC estimator in order to calculate estimate for Call option. 
Call.set.S <- function(S.values){
  S.values[length(S.values)] # The Call option uses the last value in the list. 
}

Assuming that \(\mu = r\).

M.list <- c(10, 100, 1000, 10000, 100000)
n.list <- c(10, 100, 1000)
# Simulate paths with the MC estimator and Euler-Maruyama. 
combinations <- matrix(rep(NA, length.out = length(M.list)*length(n.list)), nrow = length(M.list))

for (i in 1:length(M.list)){
  for (j in 1:length(n.list)){
    h <- T1/n.list[j]
    combinations[i, j] <- MC(M = M.list[i], n = n.list[j], r = r, s0 = s0, mu = r, h = h, 
                             sigma = sigma, T1 = T1, payoff.profile = Call.profile, set.S = Call.set.S, K = K)
  }
}
# Calculations of relative error and absolute error.
rel.errors <- (BSM.price - combinations)/BSM.price
abs.errors <- abs(BSM.price - combinations)
# Tabulate the results.
df.rel <- data.frame(rel.errors)
rownames(df.rel) <- c("M = 10", "M = 100", "M = 1000", "M = 10000", "M = 100000")
colnames(df.rel) <- c("n = 10", "n = 100", "n = 1000")

df.abs <- data.frame(abs.errors)
rownames(df.abs) <- c("M = 10", "M = 100", "M = 1000", "M = 10000", "M = 100000")
colnames(df.abs) <- c("n = 10", "n = 100", "n = 1000")

knitr::kable(df.rel, caption = "Relative Errors")
Relative Errors
n = 10 n = 100 n = 1000
M = 10 -0.5821167 0.5566707 0.6125545
M = 100 0.0255317 0.0021910 -0.3364722
M = 1000 0.0522963 0.0273627 0.0263294
M = 10000 -0.0066211 0.0508549 0.0291815
M = 100000 -0.0035652 -0.0019417 0.0029947
knitr::kable(df.abs, caption = "Absolute Errors")
Absolute Errors
n = 10 n = 100 n = 1000
M = 10 1.8336091 1.7534568 1.9294852
M = 100 0.0804222 0.0069013 1.0598536
M = 1000 0.1647280 0.0861897 0.0829349
M = 10000 0.0208557 0.1601878 0.0919187
M = 100000 0.0112299 0.0061162 0.0094331

Interpretation of results in view of \(n\) and \(M\).

TESTER MED EKSEMPLER I SLIDES!

# slide 21 Section 3
mu <- 0.05
sigma <- 0.2
s0 <- 10
T1 <- 1
n <- 5
h <- 1/n

Z <- rnorm(n)
Z <- c(0.5377, 1.8339, -2.2588, 0.8622, 0.3188)
S.values <- rep(NA, length.out = n+1)
S.values[1] <- s0
for (j in 2:(n+1)){
  S.values[j] <- S.values[j-1] + mu*S.values[j-1]*h + sigma*S.values[j-1]*sqrt(h)*Z[j-1]
}
S.values
S.values[length(S.values)]
# Correct!
# This means that the inner loop with the Euler Method is correctly implemented at least!
MC <- function(M, n, r, s0, mu, h, sigma, T1, payoff.profile, set.S, ...){
  C.values <- rep(NA, length.out = M)
  for (i in 1:M){
    Z <- rnorm(n)
    S.values <- rep(NA, length.out = n+1)
    S.values[1] <- s0
    for (j in 2:(n+1)){
      S.values[j] <- S.values[j-1] + mu*S.values[j-1]*h + sigma*S.values[j-1]*sqrt(h)*Z[j-1]
    }
    S <- set.S(S.values, n) # Generality is kept using a helper function set.S.
    C.values[i] <- payoff.profile(S, K)
  }
  exp(-r*T1)*mean(C.values) # Return discounted average payoff
}

3 Problem C - Option Pricing II

A Monte Carlo estimator is implemented for a specific Asian Call Option.

set.seed(061999)

# This function can be fed into the MC estimator instead. 
Asian.set.S <- function(S.values){
  # Average prices every friday. 
  # I think it still uses the same Call.profile (perhaps this function is not needed after all!)
}